{
if ( is_pv_32bit_vcpu(current) )
{
- struct __packed {
- unsigned eip:32,
- trapnr:15,
- use_error_code:1,
- error_code:16;
- } d;
-
- d.eip = eip;
- d.trapnr = trapnr;
- d.error_code = error_code;
- d.use_error_code=!!use_error_code;
+ struct {
+ uint32_t eip;
+ uint16_t trapnr:15;
+ bool use_error_code:1;
+ uint16_t error_code;
+ } d = {
+ .eip = eip,
+ .trapnr = trapnr,
+ .use_error_code = use_error_code,
+ .error_code = error_code,
+ };
__trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
}
else
{
struct __packed {
- unsigned long eip;
- unsigned trapnr:15,
- use_error_code:1,
- error_code:16;
- } d;
- unsigned event;
-
- d.eip = eip;
- d.trapnr = trapnr;
- d.error_code = error_code;
- d.use_error_code=!!use_error_code;
-
- event = TRC_PV_TRAP;
- event |= TRC_64_FLAG;
- __trace_var(event, 1, sizeof(d), &d);
+ uint64_t rip;
+ uint16_t trapnr:15;
+ bool use_error_code:1;
+ uint16_t error_code;
+ } d = {
+ .rip = eip,
+ .trapnr = trapnr,
+ .use_error_code = use_error_code,
+ .error_code = error_code,
+ };
+
+ __trace_var(TRC_PV_TRAP | TRC_64_FLAG, 1, sizeof(d), &d);
}
}
if ( is_pv_32bit_vcpu(current) )
{
- struct __packed {
- u32 eip, addr, error_code;
- } d;
-
- d.eip = eip;
- d.addr = addr;
- d.error_code = error_code;
+ struct {
+ uint32_t eip, addr, error_code;
+ } d = {
+ .eip = eip,
+ .addr = addr,
+ .error_code = error_code,
+ };
__trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
}
else
{
struct __packed {
- unsigned long eip, addr;
- u32 error_code;
- } d;
- unsigned event;
-
- d.eip = eip;
- d.addr = addr;
- d.error_code = error_code;
- event = TRC_PV_PAGE_FAULT;
- event |= TRC_64_FLAG;
- __trace_var(event, 1, sizeof(d), &d);
+ uint64_t rip, addr;
+ uint32_t error_code;
+ } d = {
+ .rip = eip,
+ .addr = addr,
+ .error_code = error_code,
+ };
+
+ __trace_var(TRC_PV_PAGE_FAULT | TRC_64_FLAG, 1, sizeof(d), &d);
}
}
__trace_var(event, 1, sizeof(d), &d);
}
else
- {
- event |= TRC_64_FLAG;
- __trace_var(event, 1, sizeof(va), &va);
- }
+ __trace_var(event | TRC_64_FLAG, 1, sizeof(va), &va);
}
void __trace_trap_two_addr(unsigned event, unsigned long va1,
{
if ( is_pv_32bit_vcpu(current) )
{
- struct __packed {
- u32 va1, va2;
- } d;
- d.va1=va1;
- d.va2=va2;
+ struct {
+ uint32_t va1, va2;
+ } d = {
+ .va1 = va1,
+ .va2 = va2,
+ };
+
__trace_var(event, 1, sizeof(d), &d);
}
else
{
- struct __packed {
- unsigned long va1, va2;
- } d;
- d.va1=va1;
- d.va2=va2;
- event |= TRC_64_FLAG;
- __trace_var(event, 1, sizeof(d), &d);
+ struct {
+ uint64_t va1, va2;
+ } d = {
+ .va1 = va1,
+ .va2 = va2,
+ };
+
+ __trace_var(event | TRC_64_FLAG, 1, sizeof(d), &d);
}
}
{
unsigned long eip = guest_cpu_user_regs()->rip;
- /* We have a couple of different modes to worry about:
- * - 32-on-32: 32-bit pte, 32-bit virtual addresses
- * - pae-on-pae, pae-on-64: 64-bit pte, 32-bit virtual addresses
- * - 64-on-64: 64-bit pte, 64-bit virtual addresses
- * pae-on-64 is the only one that requires extra code; in all other
- * cases, "unsigned long" is the size of a guest virtual address.
- */
-
if ( is_pv_32bit_vcpu(current) )
{
- struct __packed {
- l1_pgentry_t pte;
- u32 addr, eip;
- } d;
- d.addr = addr;
- d.eip = eip;
- d.pte = npte;
+ struct {
+ uint64_t pte;
+ uint32_t addr, eip;
+ } d = {
+ .pte = l1e_get_intpte(npte),
+ .addr = addr,
+ .eip = eip,
+ };
__trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
}
else
{
struct {
- l1_pgentry_t pte;
- unsigned long addr, eip;
- } d;
- unsigned event;
-
- d.addr = addr;
- d.eip = eip;
- d.pte = npte;
-
- event = TRC_PV_PTWR_EMULATION;
- event |= TRC_64_FLAG;
- __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+ uint64_t pte;
+ uint64_t addr, rip;
+ } d = {
+ .pte = l1e_get_intpte(npte),
+ .addr = addr,
+ .rip = eip,
+ };
+
+ __trace_var(TRC_PV_PTWR_EMULATION | TRC_64_FLAG, 1, sizeof(d), &d);
}
}